I'm gonna overwrite a lot of this notebook's old content. I changed the way I'm calculating wt, and wanna test that my training worked.
In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
In [3]:
training_file = '/u/ki/swmclau2/des/PearceRedMagicWpCosmo.hdf5'
em_method = 'rf'
#split_method = 'random'
In [4]:
a = 1.0
z = 1.0/a - 1.0
In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
In [6]:
emu.valid_methods
In [ ]:
emu = OriginalRecipe(training_file,method = em_method, fixed_params=fixed_params,
hyperparams = {'n_estimators':500, 'max_depth':5}, custom_mean_function = 'linear')
In [ ]:
emu._emulator.score(emu.x, emu.y)
In [ ]:
for name, imp in sorted(zip(emu.get_param_names(), emu._emulator.feature_importances_), key = lambda x: x[1], reverse = True):
print name, imp
In [ ]:
emu.scale_bin_centers
In [ ]:
emu.get_param_names()
In [ ]:
emu.scale_bin_centers
In [ ]:
emu._ordered_params
In [ ]:
params = {'ombh2': 0.021,
'omch2': 0.12,
'w0': -1,
'ns': 0.9578462,
'ln10As': 3.08,
'H0': 68.1,
'Neff': 3.04,
'logM1': 14.0,
'logMmin': 11.9,
'f_c': 0.3,
'logM0': 13.2,
'sigma_logM': 0.12,
'alpha':1.1}
In [ ]:
wp = emu.emulate_wrt_r(params, emu.scale_bin_centers)[0]
In [ ]:
wp
In [ ]:
idx = 12
binlen = len(emu.scale_bin_centers)
params = {pname: p for pname, p in zip(emu.get_param_names(), emu._x_std[:-1]*emu.x[idx*binlen, :-1] + emu._x_mean[:-1])}
In [ ]:
print params
In [ ]:
emu._y_std, emu._y_mean
In [ ]:
wp = emu.emulate_wrt_r(params,emu.scale_bin_centers)[0]
In [ ]:
wp
In [ ]:
plt.plot(emu.scale_bin_centers, wp, label = 'Emu')
plt.plot(emu.scale_bin_centers, emu._y_std*emu.y[idx*binlen:(idx+1)*binlen]+emu._y_mean, label = 'Truth')
#plt.plot(emu.x[idx*binlen:(idx+1)*binlen, -1], lm_pred)
plt.xscale('log')
plt.xlabel(r'$r$ [Mpc]')
plt.ylabel(r'$w_p(r_p)$')
plt.legend(loc = 'best')
plt.show()
In [ ]:
In [ ]: